From 53684fff32876d39483673b21fbeea7790f630d5 Mon Sep 17 00:00:00 2001 From: "djm@kirby.fc.hp.com" Date: Thu, 1 Sep 2005 11:09:27 -0600 Subject: [PATCH] Stil more cleanup and moving to 2.6.13 base --- xen/arch/ia64/hpsimserial.c | 2 +- xen/arch/ia64/linux-xen/README.origin | 1 + .../asm => arch/ia64/linux-xen}/hpsim_ssc.h | 0 xen/arch/ia64/process.c | 2 +- xen/arch/ia64/xenmisc.c | 28 +++ xen/include/asm-ia64/config.h | 2 + .../asm-ia64/linux-xen/asm/gcc_intrin.h | 74 ++----- xen/include/asm-ia64/linux-xen/asm/ia64regs.h | 33 +--- xen/include/asm-ia64/linux-xen/asm/io.h | 8 - xen/include/asm-ia64/linux-xen/asm/kregs.h | 34 +--- xen/include/asm-ia64/linux-xen/asm/mca_asm.h | 12 +- xen/include/asm-ia64/linux-xen/asm/page.h | 39 +--- xen/include/asm-ia64/linux-xen/asm/pgalloc.h | 181 +++++++----------- .../asm-ia64/linux-xen/asm/processor.h | 42 +--- xen/include/asm-ia64/linux-xen/asm/spinlock.h | 32 +--- xen/include/asm-ia64/linux-xen/asm/system.h | 30 ++- xen/include/asm-ia64/linux-xen/asm/tlbflush.h | 3 + xen/include/asm-ia64/linux-xen/asm/types.h | 33 +--- xen/include/asm-ia64/linux-xen/asm/uaccess.h | 48 ++++- .../asm-ia64/linux-xen/linux/cpumask.h | 22 ++- .../asm-ia64/linux-xen/linux/hardirq.h | 15 +- .../asm-ia64/linux-xen/linux/interrupt.h | 4 +- xen/include/asm-ia64/xengcc_intrin.h | 59 ++++++ xen/include/asm-ia64/xenia64regs.h | 31 +++ xen/include/asm-ia64/xenkregs.h | 37 ++++ xen/include/asm-ia64/xenpage.h | 42 ++++ xen/include/asm-ia64/xenprocessor.h | 16 ++ xen/include/asm-ia64/xenspinlock.h | 30 +++ xen/include/asm-ia64/xensystem.h | 4 + xen/include/asm-ia64/xentypes.h | 29 +++ 30 files changed, 483 insertions(+), 410 deletions(-) rename xen/{include/asm-ia64/linux-xen/asm => arch/ia64/linux-xen}/hpsim_ssc.h (100%) create mode 100644 xen/include/asm-ia64/xengcc_intrin.h create mode 100644 xen/include/asm-ia64/xenia64regs.h create mode 100644 xen/include/asm-ia64/xenkregs.h create mode 100644 xen/include/asm-ia64/xenpage.h create mode 100644 xen/include/asm-ia64/xenspinlock.h create mode 100644 xen/include/asm-ia64/xentypes.h diff --git a/xen/arch/ia64/hpsimserial.c b/xen/arch/ia64/hpsimserial.c index 3e87aa3332..6fa50fb859 100644 --- a/xen/arch/ia64/hpsimserial.c +++ b/xen/arch/ia64/hpsimserial.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include "hpsim_ssc.h" static void hp_ski_putc(struct serial_port *port, char c) { diff --git a/xen/arch/ia64/linux-xen/README.origin b/xen/arch/ia64/linux-xen/README.origin index ff15575ede..a8c592871a 100644 --- a/xen/arch/ia64/linux-xen/README.origin +++ b/xen/arch/ia64/linux-xen/README.origin @@ -7,6 +7,7 @@ to future versions of the corresponding Linux files. efi.c -> linux/arch/ia64/kernel/efi.c entry.h -> linux/arch/ia64/kernel/entry.h entry.S -> linux/arch/ia64/kernel/entry.S +hpsim_ssc.h -> linux/arch/ia64/hp/sim/hpsim_ssc.h irq_ia64.c -> linux/arch/ia64/kernel/irq_ia64.c minstate.h -> linux/arch/ia64/kernel/minstate.h mm_contig.c -> linux/arch/ia64/mm/contig.c diff --git a/xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h b/xen/arch/ia64/linux-xen/hpsim_ssc.h similarity index 100% rename from xen/include/asm-ia64/linux-xen/asm/hpsim_ssc.h rename to xen/arch/ia64/linux-xen/hpsim_ssc.h diff --git a/xen/arch/ia64/process.c b/xen/arch/ia64/process.c index 5e80ab041b..ff66c3e51b 100644 --- a/xen/arch/ia64/process.c +++ b/xen/arch/ia64/process.c @@ -28,8 +28,8 @@ #include #include #include -#include #include +#include "hpsim_ssc.h" extern unsigned long vcpu_get_itir_on_fault(struct vcpu *, UINT64); extern struct ia64_sal_retval pal_emulator_static(UINT64); diff --git a/xen/arch/ia64/xenmisc.c b/xen/arch/ia64/xenmisc.c index 701614130e..55f87e45d8 100644 --- a/xen/arch/ia64/xenmisc.c +++ b/xen/arch/ia64/xenmisc.c @@ -176,6 +176,34 @@ void free_page_type(struct pfn_info *page, unsigned int type) dummy(); } +/////////////////////////////// +//// misc memory stuff +/////////////////////////////// + +unsigned long __get_free_pages(unsigned int mask, unsigned int order) +{ + void *p = alloc_xenheap_pages(order); + + memset(p,0,PAGE_SIZE<= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) +# define ia64_popcnt(x) __builtin_popcountl(x) +#else +# define ia64_popcnt(x) \ + ({ \ __u64 ia64_intri_res; \ asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \ \ ia64_intri_res; \ -}) + }) +#endif #define ia64_getf_exp(x) \ ({ \ @@ -368,66 +372,6 @@ register unsigned long ia64_r13 asm ("r13") __attribute_used__; #define ia64_mf() asm volatile ("mf" ::: "memory") #define ia64_mfa() asm volatile ("mf.a" ::: "memory") -#ifdef CONFIG_VTI -/* - * Flushrs instruction stream. - */ -#define ia64_flushrs() asm volatile ("flushrs;;":::"memory") - -#define ia64_loadrs() asm volatile ("loadrs;;":::"memory") - -#define ia64_get_rsc() \ -({ \ - unsigned long val; \ - asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory"); \ - val; \ -}) - -#define ia64_set_rsc(val) \ - asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory") - -#define ia64_get_bspstore() \ -({ \ - unsigned long val; \ - asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory"); \ - val; \ -}) - -#define ia64_set_bspstore(val) \ - asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory") - -#define ia64_get_rnat() \ -({ \ - unsigned long val; \ - asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory"); \ - val; \ -}) - -#define ia64_set_rnat(val) \ - asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory") - -#define ia64_ttag(addr) \ -({ \ - __u64 ia64_intri_res; \ - asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ - ia64_intri_res; \ -}) - -#define ia64_get_dcr() \ -({ \ - __u64 result; \ - asm volatile ("mov %0=cr.dcr" : "=r"(result) : ); \ - result; \ -}) - -#define ia64_set_dcr(val) \ -({ \ - asm volatile ("mov cr.dcr=%0" :: "r"(val) ); \ -}) - -#endif // CONFIG_VTI - - #define ia64_invala() asm volatile ("invala" ::: "memory") #define ia64_thash(addr) \ @@ -654,4 +598,8 @@ do { \ :: "r"((x)) : "p6", "p7", "memory"); \ } while (0) +#ifdef XEN +#include +#endif + #endif /* _ASM_IA64_GCC_INTRIN_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/ia64regs.h b/xen/include/asm-ia64/linux-xen/asm/ia64regs.h index d30d54dacc..a90db1022a 100644 --- a/xen/include/asm-ia64/linux-xen/asm/ia64regs.h +++ b/xen/include/asm-ia64/linux-xen/asm/ia64regs.h @@ -87,35 +87,6 @@ #define _IA64_REG_CR_LRR0 4176 #define _IA64_REG_CR_LRR1 4177 -#ifdef CONFIG_VTI -#define IA64_REG_CR_DCR 0 -#define IA64_REG_CR_ITM 1 -#define IA64_REG_CR_IVA 2 -#define IA64_REG_CR_PTA 8 -#define IA64_REG_CR_IPSR 16 -#define IA64_REG_CR_ISR 17 -#define IA64_REG_CR_IIP 19 -#define IA64_REG_CR_IFA 20 -#define IA64_REG_CR_ITIR 21 -#define IA64_REG_CR_IIPA 22 -#define IA64_REG_CR_IFS 23 -#define IA64_REG_CR_IIM 24 -#define IA64_REG_CR_IHA 25 -#define IA64_REG_CR_LID 64 -#define IA64_REG_CR_IVR 65 -#define IA64_REG_CR_TPR 66 -#define IA64_REG_CR_EOI 67 -#define IA64_REG_CR_IRR0 68 -#define IA64_REG_CR_IRR1 69 -#define IA64_REG_CR_IRR2 70 -#define IA64_REG_CR_IRR3 71 -#define IA64_REG_CR_ITV 72 -#define IA64_REG_CR_PMV 73 -#define IA64_REG_CR_CMCV 74 -#define IA64_REG_CR_LRR0 80 -#define IA64_REG_CR_LRR1 81 -#endif // CONFIG_VTI - /* Indirect Registers for getindreg() and setindreg() */ #define _IA64_REG_INDR_CPUID 9000 /* getindreg only */ @@ -126,4 +97,8 @@ #define _IA64_REG_INDR_PMD 9005 #define _IA64_REG_INDR_RR 9006 +#ifdef XEN +#include +#endif + #endif /* _ASM_IA64_IA64REGS_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/io.h b/xen/include/asm-ia64/linux-xen/asm/io.h index 74fec0c517..883690df6d 100644 --- a/xen/include/asm-ia64/linux-xen/asm/io.h +++ b/xen/include/asm-ia64/linux-xen/asm/io.h @@ -124,14 +124,6 @@ static inline void ___ia64_mmiowb(void) ia64_mfa(); } -static inline const unsigned long -__ia64_get_io_port_base (void) -{ - extern unsigned long ia64_iobase; - - return ia64_iobase; -} - static inline void* __ia64_mk_io_addr (unsigned long port) { diff --git a/xen/include/asm-ia64/linux-xen/asm/kregs.h b/xen/include/asm-ia64/linux-xen/asm/kregs.h index 28b6501cbf..8e0795f0c8 100644 --- a/xen/include/asm-ia64/linux-xen/asm/kregs.h +++ b/xen/include/asm-ia64/linux-xen/asm/kregs.h @@ -29,21 +29,8 @@ */ #define IA64_TR_KERNEL 0 /* itr0, dtr0: maps kernel image (code & data) */ #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ -#ifdef CONFIG_VTI -#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen image in domain space */ -#endif // CONFIG_VTI #define IA64_TR_PERCPU_DATA 1 /* dtr1: percpu data */ #define IA64_TR_CURRENT_STACK 2 /* dtr2: maps kernel's memory- & register-stacks */ -#ifdef XEN -#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ -#define IA64_TR_VHPT 4 /* dtr4: vhpt */ -#define IA64_TR_ARCH_INFO 5 -#ifdef CONFIG_VTI -#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table in domain space */ -#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch stub */ -#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest physical memory 256M */ -#endif // CONFIG_VTI -#endif /* Processor status register bits: */ #define IA64_PSR_BE_BIT 1 @@ -79,9 +66,6 @@ #define IA64_PSR_ED_BIT 43 #define IA64_PSR_BN_BIT 44 #define IA64_PSR_IA_BIT 45 -#ifdef CONFIG_VTI -#define IA64_PSR_VM_BIT 46 -#endif // CONFIG_VTI /* A mask of PSR bits that we generally don't want to inherit across a clone2() or an execve(). Only list flags here that need to be cleared/set for BOTH clone2() and @@ -123,9 +107,6 @@ #define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT) #define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT) #define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT) -#ifdef CONFIG_VTI -#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) -#endif // CONFIG_VTI /* User mask bits: */ #define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH) @@ -180,20 +161,7 @@ #define IA64_ISR_CODE_PROBEF 5 #ifdef XEN -/* Interruption Function State */ -#define IA64_IFS_V_BIT 63 -#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) - -/* Page Table Address */ -#define IA64_PTA_VE_BIT 0 -#define IA64_PTA_SIZE_BIT 2 -#define IA64_PTA_VF_BIT 8 -#define IA64_PTA_BASE_BIT 15 - -#define IA64_PTA_VE (__IA64_UL(1) << IA64_PTA_VE_BIT) -#define IA64_PTA_SIZE (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT) -#define IA64_PTA_VF (__IA64_UL(1) << IA64_PTA_VF_BIT) -#define IA64_PTA_BASE (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT))) +#include #endif #endif /* _ASM_IA64_kREGS_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h index c95ee4fcbc..ea5933ecc2 100644 --- a/xen/include/asm-ia64/linux-xen/asm/mca_asm.h +++ b/xen/include/asm-ia64/linux-xen/asm/mca_asm.h @@ -29,10 +29,10 @@ #ifdef XEN #define INST_VA_TO_PA(addr) \ dep addr = 0, addr, 60, 4 -#else // XEN +#else #define INST_VA_TO_PA(addr) \ dep addr = 0, addr, 61, 3 -#endif // XEN +#endif /* * This macro converts a data virtual address to a physical address * Right now for simulation purposes the virtual addresses are @@ -51,15 +51,19 @@ #define DATA_PA_TO_VA(addr,temp) \ mov temp = 0xf ;; \ dep addr = temp, addr, 60, 4 -#else // XEN +#else #define DATA_PA_TO_VA(addr,temp) \ mov temp = 0x7 ;; \ dep addr = temp, addr, 61, 3 -#endif // XEN +#endif +#ifdef XEN +//FIXME LATER +#else #define GET_THIS_PADDR(reg, var) \ mov reg = IA64_KR(PER_CPU_DATA);; \ addl reg = THIS_CPU(var), reg +#endif /* * This macro jumps to the instruction at the given virtual address diff --git a/xen/include/asm-ia64/linux-xen/asm/page.h b/xen/include/asm-ia64/linux-xen/asm/page.h index 4a9d3ddb22..f49a8a0bbf 100644 --- a/xen/include/asm-ia64/linux-xen/asm/page.h +++ b/xen/include/asm-ia64/linux-xen/asm/page.h @@ -32,7 +32,6 @@ #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */ - #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT) #define RGN_MAP_LIMIT ((1UL << (4*PAGE_SHIFT - 12)) - PAGE_SIZE) /* per region addr limit */ @@ -96,15 +95,9 @@ extern int ia64_pfn_valid (unsigned long pfn); #endif #ifndef CONFIG_DISCONTIGMEM -#ifdef XEN -# define pfn_valid(pfn) (0) -# define page_to_pfn(_page) ((unsigned long)((_page) - frame_table)) -# define pfn_to_page(_pfn) (frame_table + (_pfn)) -#else # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) # define page_to_pfn(page) ((unsigned long) (page - mem_map)) # define pfn_to_page(pfn) (mem_map + (pfn)) -#endif #else extern struct page *vmem_map; extern unsigned long max_low_pfn; @@ -116,11 +109,6 @@ extern unsigned long max_low_pfn; #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#ifdef XEN -#define page_to_virt(_page) phys_to_virt(page_to_phys(_page)) -#define phys_to_page(kaddr) pfn_to_page(((kaddr) >> PAGE_SHIFT)) -#endif - typedef union ia64_va { struct { unsigned long off : 61; /* intra-region offset */ @@ -136,23 +124,8 @@ typedef union ia64_va { * expressed in this way to ensure they result in a single "dep" * instruction. */ -#ifdef XEN -typedef union xen_va { - struct { - unsigned long off : 60; - unsigned long reg : 4; - } f; - unsigned long l; - void *p; -} xen_va; - -// xen/drivers/console.c uses __va in a declaration (should be fixed!) -#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) -#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) -#else #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) -#endif #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;}) #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;}) @@ -164,9 +137,9 @@ typedef union xen_va { # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \ | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT))) # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) -# define is_hugepage_only_range(addr, len) \ +# define is_hugepage_only_range(mm, addr, len) \ (REGION_NUMBER(addr) == REGION_HPAGE && \ - REGION_NUMBER((addr)+(len)) == REGION_HPAGE) + REGION_NUMBER((addr)+(len)-1) == REGION_HPAGE) extern unsigned int hpage_shift; #endif @@ -224,15 +197,15 @@ get_order (unsigned long size) # define __pgprot(x) (x) #endif /* !STRICT_MM_TYPECHECKS */ -#ifdef XEN -#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000) -#else #define PAGE_OFFSET __IA64_UL_CONST(0xe000000000000000) -#endif #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \ (((current->personality & READ_IMPLIES_EXEC) != 0) \ ? VM_EXEC : 0)) +#ifdef XEN +#include +#endif + #endif /* _ASM_IA64_PAGE_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h index 09c98cde6e..96a405de96 100644 --- a/xen/include/asm-ia64/linux-xen/asm/pgalloc.h +++ b/xen/include/asm-ia64/linux-xen/asm/pgalloc.h @@ -21,176 +21,127 @@ #include #include -#include -/* - * Very stupidly, we used to get new pgd's and pmd's, init their contents - * to point to the NULL versions of the next level page table, later on - * completely re-init them the same way, then free them up. This wasted - * a lot of work and caused unnecessary memory traffic. How broken... - * We fix this by caching them. - */ -#define pgd_quicklist (local_cpu_data->pgd_quick) -#define pmd_quicklist (local_cpu_data->pmd_quick) -#define pgtable_cache_size (local_cpu_data->pgtable_cache_sz) +#ifndef XEN +DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist); +#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist) +DECLARE_PER_CPU(long, __pgtable_quicklist_size); +#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size) + +static inline long pgtable_quicklist_total_size(void) +{ + long ql_size = 0; + int cpuid; + + for_each_online_cpu(cpuid) { + ql_size += per_cpu(__pgtable_quicklist_size, cpuid); + } + return ql_size; +} -static inline pgd_t* -pgd_alloc_one_fast (struct mm_struct *mm) +static inline void *pgtable_quicklist_alloc(void) { unsigned long *ret = NULL; preempt_disable(); - ret = pgd_quicklist; + ret = pgtable_quicklist; if (likely(ret != NULL)) { - pgd_quicklist = (unsigned long *)(*ret); + pgtable_quicklist = (unsigned long *)(*ret); ret[0] = 0; - --pgtable_cache_size; - } else - ret = NULL; - - preempt_enable(); + --pgtable_quicklist_size; + preempt_enable(); + } else { + preempt_enable(); + ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + } - return (pgd_t *) ret; + return ret; } -static inline pgd_t* -pgd_alloc (struct mm_struct *mm) +static inline void pgtable_quicklist_free(void *pgtable_entry) { - /* the VM system never calls pgd_alloc_one_fast(), so we do it here. */ - pgd_t *pgd = pgd_alloc_one_fast(mm); - - if (unlikely(pgd == NULL)) { -#ifdef XEN - pgd = (pgd_t *)alloc_xenheap_page(); - memset(pgd,0,PAGE_SIZE); -#else - pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); -#endif +#ifdef CONFIG_NUMA + unsigned long nid = page_to_nid(virt_to_page(pgtable_entry)); + + if (unlikely(nid != numa_node_id())) { + free_page((unsigned long)pgtable_entry); + return; } - return pgd; -} +#endif -static inline void -pgd_free (pgd_t *pgd) -{ preempt_disable(); - *(unsigned long *)pgd = (unsigned long) pgd_quicklist; - pgd_quicklist = (unsigned long *) pgd; - ++pgtable_cache_size; + *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist; + pgtable_quicklist = (unsigned long *)pgtable_entry; + ++pgtable_quicklist_size; preempt_enable(); } +#endif -static inline void -pud_populate (struct mm_struct *mm, pud_t *pud_entry, pmd_t *pmd) +static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pud_val(*pud_entry) = __pa(pmd); + return pgtable_quicklist_alloc(); } -static inline pmd_t* -pmd_alloc_one_fast (struct mm_struct *mm, unsigned long addr) +static inline void pgd_free(pgd_t * pgd) { - unsigned long *ret = NULL; - - preempt_disable(); - - ret = (unsigned long *)pmd_quicklist; - if (likely(ret != NULL)) { - pmd_quicklist = (unsigned long *)(*ret); - ret[0] = 0; - --pgtable_cache_size; - } - - preempt_enable(); - - return (pmd_t *)ret; + pgtable_quicklist_free(pgd); } -static inline pmd_t* -pmd_alloc_one (struct mm_struct *mm, unsigned long addr) +static inline void +pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd) { -#ifdef XEN - pmd_t *pmd = (pmd_t *)alloc_xenheap_page(); - memset(pmd,0,PAGE_SIZE); -#else - pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); -#endif + pud_val(*pud_entry) = __pa(pmd); +} - return pmd; +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return pgtable_quicklist_alloc(); } -static inline void -pmd_free (pmd_t *pmd) +static inline void pmd_free(pmd_t * pmd) { - preempt_disable(); - *(unsigned long *)pmd = (unsigned long) pmd_quicklist; - pmd_quicklist = (unsigned long *) pmd; - ++pgtable_cache_size; - preempt_enable(); + pgtable_quicklist_free(pmd); } #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) static inline void -pmd_populate (struct mm_struct *mm, pmd_t *pmd_entry, struct page *pte) +pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte) { pmd_val(*pmd_entry) = page_to_phys(pte); } static inline void -pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte) +pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte) { pmd_val(*pmd_entry) = __pa(pte); } -static inline struct page * -pte_alloc_one (struct mm_struct *mm, unsigned long addr) +static inline struct page *pte_alloc_one(struct mm_struct *mm, + unsigned long addr) { -#ifdef XEN - struct page *pte = alloc_xenheap_page(); - memset(pte,0,PAGE_SIZE); -#else - struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); -#endif - - return pte; + return virt_to_page(pgtable_quicklist_alloc()); } -static inline pte_t * -pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr) +static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, + unsigned long addr) { -#ifdef XEN - pte_t *pte = (pte_t *)alloc_xenheap_page(); - memset(pte,0,PAGE_SIZE); -#else - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); -#endif - - return pte; + return pgtable_quicklist_alloc(); } -static inline void -pte_free (struct page *pte) +static inline void pte_free(struct page *pte) { -#ifdef XEN - free_xenheap_page(pte); -#else - __free_page(pte); -#endif + pgtable_quicklist_free(page_address(pte)); } -static inline void -pte_free_kernel (pte_t *pte) +static inline void pte_free_kernel(pte_t * pte) { -#ifdef XEN - free_xenheap_page((unsigned long) pte); -#else - free_page((unsigned long) pte); -#endif + pgtable_quicklist_free(pte); } -#define __pte_free_tlb(tlb, pte) tlb_remove_page((tlb), (pte)) +#define __pte_free_tlb(tlb, pte) pte_free(pte) -extern void check_pgt_cache (void); +extern void check_pgt_cache(void); -#endif /* _ASM_IA64_PGALLOC_H */ +#endif /* _ASM_IA64_PGALLOC_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/processor.h b/xen/include/asm-ia64/linux-xen/asm/processor.h index a35d69a9a8..4b7f22da94 100644 --- a/xen/include/asm-ia64/linux-xen/asm/processor.h +++ b/xen/include/asm-ia64/linux-xen/asm/processor.h @@ -42,14 +42,6 @@ */ #define TASK_SIZE (current->thread.task_size) -/* - * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for - * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE, - * because the kernel may have installed helper-mappings above TASK_SIZE. For example, - * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE. - */ -#define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE - /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. @@ -94,11 +86,10 @@ #ifdef CONFIG_NUMA #include #endif + #ifdef XEN #include -#endif - -#ifndef XEN +#else /* like above but expressed as bitfields for more efficient access: */ struct ia64_psr { __u64 reserved0 : 1; @@ -150,9 +141,6 @@ struct cpuinfo_ia64 { __u64 nsec_per_cyc; /* (1000000000<thread.last_fph_cpu = smp_processor_id(); \ diff --git a/xen/include/asm-ia64/linux-xen/asm/spinlock.h b/xen/include/asm-ia64/linux-xen/asm/spinlock.h index 6082158255..c2d32e7a48 100644 --- a/xen/include/asm-ia64/linux-xen/asm/spinlock.h +++ b/xen/include/asm-ia64/linux-xen/asm/spinlock.h @@ -120,35 +120,6 @@ do { \ #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) -#ifdef XEN -/* - * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be - * reentered recursively on the same CPU. All critical regions that may form - * part of a recursively-nested set must be protected by these forms. If there - * are any critical regions that cannot form part of such a set, they can use - * standard spin_[un]lock(). - */ -#define _raw_spin_lock_recursive(_lock) \ - do { \ - int cpu = smp_processor_id(); \ - if ( likely((_lock)->recurse_cpu != cpu) ) \ - { \ - spin_lock(_lock); \ - (_lock)->recurse_cpu = cpu; \ - } \ - (_lock)->recurse_cnt++; \ - } while ( 0 ) - -#define _raw_spin_unlock_recursive(_lock) \ - do { \ - if ( likely(--(_lock)->recurse_cnt == 0) ) \ - { \ - (_lock)->recurse_cpu = -1; \ - spin_unlock(_lock); \ - } \ - } while ( 0 ) -#endif - typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; @@ -238,4 +209,7 @@ do { \ clear_bit(31, (x)); \ }) +#ifdef XEN +#include +#endif #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/system.h b/xen/include/asm-ia64/linux-xen/asm/system.h index d8d9dd20db..e77f67225f 100644 --- a/xen/include/asm-ia64/linux-xen/asm/system.h +++ b/xen/include/asm-ia64/linux-xen/asm/system.h @@ -18,19 +18,14 @@ #include #include #include -#ifdef XEN -#include -#endif #define GATE_ADDR __IA64_UL_CONST(0xa000000000000000) /* * 0xa000000000000000+2*PERCPU_PAGE_SIZE * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page) */ -#ifndef XEN #define KERNEL_START __IA64_UL_CONST(0xa000000100000000) #define PERCPU_ADDR (-PERCPU_PAGE_SIZE) -#endif #ifndef __ASSEMBLY__ @@ -188,8 +183,6 @@ do { \ #ifdef __KERNEL__ -#define prepare_to_switch() do { } while(0) - #ifdef CONFIG_IA32_SUPPORT # define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0) #else @@ -223,7 +216,6 @@ extern void ia64_load_extra (struct task_struct *task); # define PERFMON_IS_SYSWIDE() (0) #endif -#ifndef XEN #define IA64_HAS_EXTRA_STATE(t) \ ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) @@ -236,7 +228,6 @@ extern void ia64_load_extra (struct task_struct *task); ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ (last) = ia64_switch_to((next)); \ } while (0) -#endif #ifdef CONFIG_SMP /* @@ -247,9 +238,9 @@ extern void ia64_load_extra (struct task_struct *task); */ # define switch_to(prev,next,last) do { \ if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ - /* ia64_psr(ia64_task_regs(prev))->mfh = 0; */ \ - /* (prev)->thread.flags |= IA64_THREAD_FPH_VALID; */ \ - /* __ia64_save_fpu((prev)->thread.fph); */ \ + ia64_psr(ia64_task_regs(prev))->mfh = 0; \ + (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ + __ia64_save_fpu((prev)->thread.fph); \ } \ __switch_to(prev, next, last); \ } while (0) @@ -281,19 +272,20 @@ extern void ia64_load_extra (struct task_struct *task); * of that CPU which will not be released, because there we wait for the * tasklist_lock to become available. */ -#define prepare_arch_switch(rq, next) \ -do { \ - spin_lock(&(next)->switch_lock); \ - spin_unlock(&(rq)->lock); \ -} while (0) -#define finish_arch_switch(rq, prev) spin_unlock_irq(&(prev)->switch_lock) -#define task_running(rq, p) ((rq)->curr == (p) || spin_is_locked(&(p)->switch_lock)) +#define __ARCH_WANT_UNLOCKED_CTXSW #define ia64_platform_is(x) (strcmp(x, platform_name) == 0) void cpu_idle_wait(void); + +#define arch_align_stack(x) (x) + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ +#ifdef XEN +#include +#endif + #endif /* _ASM_IA64_SYSTEM_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h index 3cab2a5af2..a53253f8d2 100644 --- a/xen/include/asm-ia64/linux-xen/asm/tlbflush.h +++ b/xen/include/asm-ia64/linux-xen/asm/tlbflush.h @@ -37,6 +37,7 @@ static inline void local_finish_flush_tlb_mm (struct mm_struct *mm) { #ifndef XEN +// FIXME SMP? if (mm == current->active_mm) activate_context(mm); #endif @@ -54,6 +55,7 @@ flush_tlb_mm (struct mm_struct *mm) return; #ifndef XEN +// FIXME SMP? mm->context = 0; #endif @@ -81,6 +83,7 @@ flush_tlb_page (struct vm_area_struct *vma, unsigned long addr) if (vma->vm_mm == current->active_mm) ia64_ptcl(addr, (PAGE_SHIFT << 2)); #ifndef XEN +// FIXME SMP? else vma->vm_mm->context = 0; #endif diff --git a/xen/include/asm-ia64/linux-xen/asm/types.h b/xen/include/asm-ia64/linux-xen/asm/types.h index 71b19cbc63..d2c04d3f6a 100644 --- a/xen/include/asm-ia64/linux-xen/asm/types.h +++ b/xen/include/asm-ia64/linux-xen/asm/types.h @@ -1,12 +1,5 @@ #ifndef _ASM_IA64_TYPES_H #define _ASM_IA64_TYPES_H -#ifdef XEN -#ifndef __ASSEMBLY__ -typedef unsigned long ssize_t; -typedef unsigned long size_t; -typedef long long loff_t; -#endif -#endif /* * This file is never included by application software unless explicitly requested (e.g., @@ -68,28 +61,6 @@ typedef __u32 u32; typedef __s64 s64; typedef __u64 u64; -#ifdef XEN -/* - * Below are truly Linux-specific types that should never collide with - * any application/library that wants linux/types.h. - */ - -#ifdef __CHECKER__ -#define __bitwise __attribute__((bitwise)) -#else -#define __bitwise -#endif - -typedef __u16 __bitwise __le16; -typedef __u16 __bitwise __be16; -typedef __u32 __bitwise __le32; -typedef __u32 __bitwise __be32; -#if defined(__GNUC__) && !defined(__STRICT_ANSI__) -typedef __u64 __bitwise __le64; -typedef __u64 __bitwise __be64; -#endif -#endif - #define BITS_PER_LONG 64 /* DMA addresses are 64-bits wide, in general. */ @@ -101,4 +72,8 @@ typedef unsigned short kmem_bufctl_t; # endif /* __KERNEL__ */ #endif /* !__ASSEMBLY__ */ +#ifdef XEN +#include +#endif + #endif /* _ASM_IA64_TYPES_H */ diff --git a/xen/include/asm-ia64/linux-xen/asm/uaccess.h b/xen/include/asm-ia64/linux-xen/asm/uaccess.h index 607faf2151..e206565fd5 100644 --- a/xen/include/asm-ia64/linux-xen/asm/uaccess.h +++ b/xen/include/asm-ia64/linux-xen/asm/uaccess.h @@ -32,16 +32,15 @@ * David Mosberger-Tang */ -#ifdef CONFIG_VTI -#include -#else // CONFIG_VTI - #include #include #include +#include +#include #include #include +#include /* * For historical reasons, the following macros are grossly misnamed: @@ -65,7 +64,6 @@ * point inside the virtually mapped linear page table. */ #ifdef XEN -/* VT-i reserves bit 60 for the VMM; guest addresses have bit 60 = bit 59 */ #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1) #define __access_ok(addr, size, segment) (!IS_VMM_ADDRESS((unsigned long)(addr))) #else @@ -79,7 +77,8 @@ #endif #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) -static inline int +/* this function will go away soon - use access_ok() instead */ +static inline int __deprecated verify_area (int type, const void __user *addr, unsigned long size) { return access_ok(type, addr, size) ? 0 : -EFAULT; @@ -353,7 +352,6 @@ extern unsigned long __strnlen_user (const char __user *, long); __su_ret; \ }) -#endif // CONFIG_VTI /* Generic code can't deal with the location-relative format that we use for compactness. */ #define ARCH_HAS_SORT_EXTABLE #define ARCH_HAS_SEARCH_EXTABLE @@ -378,4 +376,40 @@ ia64_done_with_exception (struct pt_regs *regs) return 0; } +#ifndef XEN +#define ARCH_HAS_TRANSLATE_MEM_PTR 1 +static __inline__ char * +xlate_dev_mem_ptr (unsigned long p) +{ + struct page *page; + char * ptr; + + page = pfn_to_page(p >> PAGE_SHIFT); + if (PageUncached(page)) + ptr = (char *)p + __IA64_UNCACHED_OFFSET; + else + ptr = __va(p); + + return ptr; +} + +/* + * Convert a virtual cached kernel memory pointer to an uncached pointer + */ +static __inline__ char * +xlate_dev_kmem_ptr (char * p) +{ + struct page *page; + char * ptr; + + page = virt_to_page((unsigned long)p >> PAGE_SHIFT); + if (PageUncached(page)) + ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET; + else + ptr = p; + + return ptr; +} +#endif + #endif /* _ASM_IA64_UACCESS_H */ diff --git a/xen/include/asm-ia64/linux-xen/linux/cpumask.h b/xen/include/asm-ia64/linux-xen/linux/cpumask.h index 1aef8198e1..46e552641f 100644 --- a/xen/include/asm-ia64/linux-xen/linux/cpumask.h +++ b/xen/include/asm-ia64/linux-xen/linux/cpumask.h @@ -10,6 +10,8 @@ * * For details of cpumask_scnprintf() and cpumask_parse(), * see bitmap_scnprintf() and bitmap_parse() in lib/bitmap.c. + * For details of cpulist_scnprintf() and cpulist_parse(), see + * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c. * * The available cpumask operations are: * @@ -46,6 +48,8 @@ * * int cpumask_scnprintf(buf, len, mask) Format cpumask for printing * int cpumask_parse(ubuf, ulen, mask) Parse ascii string as cpumask + * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing + * int cpulist_parse(buf, map) Parse ascii string as cpulist * * for_each_cpu_mask(cpu, mask) for-loop cpu over mask * @@ -268,14 +272,28 @@ static inline int __cpumask_scnprintf(char *buf, int len, return bitmap_scnprintf(buf, len, srcp->bits, nbits); } -#define cpumask_parse(ubuf, ulen, src) \ - __cpumask_parse((ubuf), (ulen), &(src), NR_CPUS) +#define cpumask_parse(ubuf, ulen, dst) \ + __cpumask_parse((ubuf), (ulen), &(dst), NR_CPUS) static inline int __cpumask_parse(const char __user *buf, int len, cpumask_t *dstp, int nbits) { return bitmap_parse(buf, len, dstp->bits, nbits); } +#define cpulist_scnprintf(buf, len, src) \ + __cpulist_scnprintf((buf), (len), &(src), NR_CPUS) +static inline int __cpulist_scnprintf(char *buf, int len, + const cpumask_t *srcp, int nbits) +{ + return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); +} + +#define cpulist_parse(buf, dst) __cpulist_parse((buf), &(dst), NR_CPUS) +static inline int __cpulist_parse(const char *buf, cpumask_t *dstp, int nbits) +{ + return bitmap_parselist(buf, dstp->bits, nbits); +} + #if NR_CPUS > 1 #define for_each_cpu_mask(cpu, mask) \ for ((cpu) = first_cpu(mask); \ diff --git a/xen/include/asm-ia64/linux-xen/linux/hardirq.h b/xen/include/asm-ia64/linux-xen/linux/hardirq.h index 31b91d845b..2431491726 100644 --- a/xen/include/asm-ia64/linux-xen/linux/hardirq.h +++ b/xen/include/asm-ia64/linux-xen/linux/hardirq.h @@ -2,6 +2,7 @@ #define LINUX_HARDIRQ_H #include +#include #include #include #include @@ -43,13 +44,19 @@ #define __IRQ_MASK(x) ((1UL << (x))-1) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) -#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) +#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) +#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) +#ifndef XEN +#error PREEMPT_ACTIVE is too low! +#endif +#endif + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) @@ -60,10 +67,10 @@ */ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) -#ifndef XEN -#define in_interrupt() (irq_count()) +#ifdef XEN +#define in_interrupt() 0 // FIXME SMP LATER #else -#define in_interrupt() 0 // FIXME LATER +#define in_interrupt() (irq_count()) #endif #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) diff --git a/xen/include/asm-ia64/linux-xen/linux/interrupt.h b/xen/include/asm-ia64/linux-xen/linux/interrupt.h index 1f12d7e779..caea47d641 100644 --- a/xen/include/asm-ia64/linux-xen/linux/interrupt.h +++ b/xen/include/asm-ia64/linux-xen/linux/interrupt.h @@ -123,7 +123,9 @@ struct softirq_action }; asmlinkage void do_softirq(void); -//extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); +#ifndef XEN +extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); +#endif extern void softirq_init(void); #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); diff --git a/xen/include/asm-ia64/xengcc_intrin.h b/xen/include/asm-ia64/xengcc_intrin.h new file mode 100644 index 0000000000..818fae7da7 --- /dev/null +++ b/xen/include/asm-ia64/xengcc_intrin.h @@ -0,0 +1,59 @@ +#ifndef _ASM_IA64_XENGCC_INTRIN_H +#define _ASM_IA64_XENGCC_INTRIN_H +/* + * Flushrs instruction stream. + */ +#define ia64_flushrs() asm volatile ("flushrs;;":::"memory") + +#define ia64_loadrs() asm volatile ("loadrs;;":::"memory") + +#define ia64_get_rsc() \ +({ \ + unsigned long val; \ + asm volatile ("mov %0=ar.rsc;;" : "=r"(val) :: "memory"); \ + val; \ +}) + +#define ia64_set_rsc(val) \ + asm volatile ("mov ar.rsc=%0;;" :: "r"(val) : "memory") + +#define ia64_get_bspstore() \ +({ \ + unsigned long val; \ + asm volatile ("mov %0=ar.bspstore;;" : "=r"(val) :: "memory"); \ + val; \ +}) + +#define ia64_set_bspstore(val) \ + asm volatile ("mov ar.bspstore=%0;;" :: "r"(val) : "memory") + +#define ia64_get_rnat() \ +({ \ + unsigned long val; \ + asm volatile ("mov %0=ar.rnat;" : "=r"(val) :: "memory"); \ + val; \ +}) + +#define ia64_set_rnat(val) \ + asm volatile ("mov ar.rnat=%0;;" :: "r"(val) : "memory") + +#define ia64_ttag(addr) \ +({ \ + __u64 ia64_intri_res; \ + asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \ + ia64_intri_res; \ +}) + +#define ia64_get_dcr() \ +({ \ + __u64 result; \ + asm volatile ("mov %0=cr.dcr" : "=r"(result) : ); \ + result; \ +}) + +#define ia64_set_dcr(val) \ +({ \ + asm volatile ("mov cr.dcr=%0" :: "r"(val) ); \ +}) + +#endif /* _ASM_IA64_XENGCC_INTRIN_H */ diff --git a/xen/include/asm-ia64/xenia64regs.h b/xen/include/asm-ia64/xenia64regs.h new file mode 100644 index 0000000000..099fc7250e --- /dev/null +++ b/xen/include/asm-ia64/xenia64regs.h @@ -0,0 +1,31 @@ +#ifndef _ASM_IA64_XENIA64REGS_H +#define _ASM_IA64_XENIA64REGS_H + +#define IA64_REG_CR_DCR 0 +#define IA64_REG_CR_ITM 1 +#define IA64_REG_CR_IVA 2 +#define IA64_REG_CR_PTA 8 +#define IA64_REG_CR_IPSR 16 +#define IA64_REG_CR_ISR 17 +#define IA64_REG_CR_IIP 19 +#define IA64_REG_CR_IFA 20 +#define IA64_REG_CR_ITIR 21 +#define IA64_REG_CR_IIPA 22 +#define IA64_REG_CR_IFS 23 +#define IA64_REG_CR_IIM 24 +#define IA64_REG_CR_IHA 25 +#define IA64_REG_CR_LID 64 +#define IA64_REG_CR_IVR 65 +#define IA64_REG_CR_TPR 66 +#define IA64_REG_CR_EOI 67 +#define IA64_REG_CR_IRR0 68 +#define IA64_REG_CR_IRR1 69 +#define IA64_REG_CR_IRR2 70 +#define IA64_REG_CR_IRR3 71 +#define IA64_REG_CR_ITV 72 +#define IA64_REG_CR_PMV 73 +#define IA64_REG_CR_CMCV 74 +#define IA64_REG_CR_LRR0 80 +#define IA64_REG_CR_LRR1 81 + +#endif /* _ASM_IA64_XENIA64REGS_H */ diff --git a/xen/include/asm-ia64/xenkregs.h b/xen/include/asm-ia64/xenkregs.h new file mode 100644 index 0000000000..c2eb14e37c --- /dev/null +++ b/xen/include/asm-ia64/xenkregs.h @@ -0,0 +1,37 @@ +#ifndef _ASM_IA64_XENKREGS_H +#define _ASM_IA64_XENKREGS_H + +/* + * Translation registers: + */ +#define IA64_TR_SHARED_INFO 3 /* dtr3: page shared with domain */ +#define IA64_TR_VHPT 4 /* dtr4: vhpt */ +#define IA64_TR_ARCH_INFO 5 + +#ifdef CONFIG_VTI +#define IA64_TR_VHPT_IN_DOM 5 /* dtr5: Double mapping for vhpt table in domain space */ +#define IA64_TR_XEN_IN_DOM 6 /* itr6, dtr6: Double mapping for xen image in domain space */ +#define IA64_TR_RR7_SWITCH_STUB 7 /* dtr7: mapping for rr7 switch stub */ +#define IA64_TEMP_PHYSICAL 8 /* itr8, dtr8: temp mapping for guest physical memory 256M */ +#endif // CONFIG_VTI + +/* Processor status register bits: */ +#define IA64_PSR_VM_BIT 46 +#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT) + +/* Interruption Function State */ +#define IA64_IFS_V_BIT 63 +#define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT) + +/* Page Table Address */ +#define IA64_PTA_VE_BIT 0 +#define IA64_PTA_SIZE_BIT 2 +#define IA64_PTA_VF_BIT 8 +#define IA64_PTA_BASE_BIT 15 + +#define IA64_PTA_VE (__IA64_UL(1) << IA64_PTA_VE_BIT) +#define IA64_PTA_SIZE (__IA64_UL(0x3f) << IA64_PTA_SIZE_BIT) +#define IA64_PTA_VF (__IA64_UL(1) << IA64_PTA_VF_BIT) +#define IA64_PTA_BASE (__IA64_UL(0) - ((__IA64_UL(1) << IA64_PTA_BASE_BIT))) + +#endif /* _ASM_IA64_XENKREGS_H */ diff --git a/xen/include/asm-ia64/xenpage.h b/xen/include/asm-ia64/xenpage.h new file mode 100644 index 0000000000..4025eb1e51 --- /dev/null +++ b/xen/include/asm-ia64/xenpage.h @@ -0,0 +1,42 @@ +#ifndef _ASM_IA64_XENPAGE_H +#define _ASM_IA64_XENPAGE_H + +#ifdef CONFIG_DISCONTIGMEM +#error "xenpage.h: page macros need to be defined for CONFIG_DISCONTIGMEM" +#endif + +#undef pfn_valid +#undef page_to_pfn +#undef pfn_to_page +# define pfn_valid(pfn) (0) +# define page_to_pfn(_page) ((unsigned long) ((_page) - frame_table)) +# define pfn_to_page(_pfn) (frame_table + (_pfn)) + +#undef page_to_phys +#undef virt_to_page +#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) + +#define page_to_virt(_page) phys_to_virt(page_to_phys(_page)) +#define phys_to_page(kaddr) pfn_to_page(((kaddr) >> PAGE_SHIFT)) + +#ifndef __ASSEMBLY__ +typedef union xen_va { + struct { + unsigned long off : 60; + unsigned long reg : 4; + } f; + unsigned long l; + void *p; +} xen_va; +#endif + +#undef __pa +#undef __va +#define __pa(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;}) +#define __va(x) ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;}) + +#undef PAGE_OFFSET +#define PAGE_OFFSET __IA64_UL_CONST(0xf000000000000000) + +#endif /* _ASM_IA64_XENPAGE_H */ diff --git a/xen/include/asm-ia64/xenprocessor.h b/xen/include/asm-ia64/xenprocessor.h index 91badbeb51..62c0459360 100644 --- a/xen/include/asm-ia64/xenprocessor.h +++ b/xen/include/asm-ia64/xenprocessor.h @@ -213,4 +213,20 @@ enum { ret; \ }) +typedef union { + struct { + __u64 kr0; + __u64 kr1; + __u64 kr2; + __u64 kr3; + __u64 kr4; + __u64 kr5; + __u64 kr6; + __u64 kr7; + }; + __u64 _kr[8]; +} cpu_kr_ia64_t; + +DECLARE_PER_CPU(cpu_kr_ia64_t, cpu_kr); + #endif // _ASM_IA64_XENPROCESSOR_H diff --git a/xen/include/asm-ia64/xenspinlock.h b/xen/include/asm-ia64/xenspinlock.h new file mode 100644 index 0000000000..d383df4310 --- /dev/null +++ b/xen/include/asm-ia64/xenspinlock.h @@ -0,0 +1,30 @@ +#ifndef _ASM_IA64_XENSPINLOCK_H +#define _ASM_IA64_XENSPINLOCK_H + +/* + * spin_[un]lock_recursive(): Use these forms when the lock can (safely!) be + * reentered recursively on the same CPU. All critical regions that may form + * part of a recursively-nested set must be protected by these forms. If there + * are any critical regions that cannot form part of such a set, they can use + * standard spin_[un]lock(). + */ +#define _raw_spin_lock_recursive(_lock) \ + do { \ + int cpu = smp_processor_id(); \ + if ( likely((_lock)->recurse_cpu != cpu) ) \ + { \ + spin_lock(_lock); \ + (_lock)->recurse_cpu = cpu; \ + } \ + (_lock)->recurse_cnt++; \ + } while ( 0 ) + +#define _raw_spin_unlock_recursive(_lock) \ + do { \ + if ( likely(--(_lock)->recurse_cnt == 0) ) \ + { \ + (_lock)->recurse_cpu = -1; \ + spin_unlock(_lock); \ + } \ + } while ( 0 ) +#endif /* _ASM_IA64_XENSPINLOCK_H */ diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h index 376f480764..07958f6869 100644 --- a/xen/include/asm-ia64/xensystem.h +++ b/xen/include/asm-ia64/xensystem.h @@ -22,7 +22,9 @@ #endif // CONFIG_VTI #define XEN_START_ADDR 0xf000000000000000 +#undef KERNEL_START #define KERNEL_START 0xf000000004000000 +#undef PERCPU_ADDR #define PERCPU_ADDR 0xf100000000000000-PERCPU_PAGE_SIZE #define SHAREDINFO_ADDR 0xf100000000000000 #define VHPT_ADDR 0xf200000000000000 @@ -31,8 +33,10 @@ #ifndef __ASSEMBLY__ +#undef IA64_HAS_EXTRA_STATE #define IA64_HAS_EXTRA_STATE(t) 0 +#undef __switch_to #ifdef CONFIG_VTI extern struct task_struct *vmx_ia64_switch_to (void *next_task); #define __switch_to(prev,next,last) do { \ diff --git a/xen/include/asm-ia64/xentypes.h b/xen/include/asm-ia64/xentypes.h new file mode 100644 index 0000000000..83ff65890e --- /dev/null +++ b/xen/include/asm-ia64/xentypes.h @@ -0,0 +1,29 @@ +#ifndef _ASM_IA64_XENTYPES_H +#define _ASM_IA64_XENTYPES_H + +#ifndef __ASSEMBLY__ +typedef unsigned long ssize_t; +typedef unsigned long size_t; +typedef long long loff_t; + +#ifdef __KERNEL__ +/* these lines taken from linux/types.h. they belong in xen/types.h */ +#ifdef __CHECKER__ +#define __bitwise __attribute__((bitwise)) +#else +#define __bitwise +#endif + +typedef __u16 __bitwise __le16; +typedef __u16 __bitwise __be16; +typedef __u32 __bitwise __le32; +typedef __u32 __bitwise __be32; +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __u64 __bitwise __le64; +typedef __u64 __bitwise __be64; +#endif + +# endif /* __KERNEL__ */ +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_IA64_XENTYPES_H */ -- 2.30.2